cd ../..
/Users/shanekercheval/repos/data-science-template
%run "source/config/notebook_settings.py"
import os
import mlflow
from mlflow.tracking import MlflowClient
from helpsk.utility import read_pickle
import helpsk as hlp
from source.library.utilities import Timer, log_info, get_config
config = get_config()
mlflow_uri = config['MLFLOW']['URI']
log_info(f"MLFlow URI: {mlflow_uri}")
client = MlflowClient(tracking_uri='http://127.0.0.1:1234')
2022-06-10 14:46:55 - INFO | MLFlow URI: http://127.0.0.1:1234
credit_experiment = client.get_experiment_by_name(name=config['MLFLOW']['EXPERIMENT_NAME'])
runs = client.list_run_infos(experiment_id=credit_experiment.experiment_id)
latest_run = runs[np.argmax([x.start_time for x in runs])]
yaml_path = client.download_artifacts(run_id=latest_run.run_id, path='experiment.yaml')
results = hlp.sklearn_eval.MLExperimentResults.from_yaml_file(yaml_file_name = yaml_path)
best_estimator = read_pickle(client.download_artifacts(
run_id=latest_run.run_id,
path='experiment_best_estimator.pkl'
))
best_estimator
Pipeline(steps=[('prep',
ColumnTransformer(transformers=[('numeric',
Pipeline(steps=[('imputer',
TransformerChooser(transformer=SimpleImputer(strategy='median'))),
('scaler',
TransformerChooser()),
('pca',
TransformerChooser(transformer=PCA(n_components='mle')))]),
['duration', 'credit_amount',
'installment_commitment',
'residence_since', 'age',
'existing_credi...
'savings_status',
'employment',
'personal_status',
'other_parties',
'property_magnitude',
'other_payment_plans',
'housing', 'job',
'own_telephone',
'foreign_worker'])])),
('model',
RandomForestClassifier(criterion='entropy', max_depth=70,
max_features=0.1142268477118407,
max_samples=0.5483119512487002,
min_samples_leaf=8,
min_samples_split=12, n_estimators=553,
random_state=42))])In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. Pipeline(steps=[('prep',
ColumnTransformer(transformers=[('numeric',
Pipeline(steps=[('imputer',
TransformerChooser(transformer=SimpleImputer(strategy='median'))),
('scaler',
TransformerChooser()),
('pca',
TransformerChooser(transformer=PCA(n_components='mle')))]),
['duration', 'credit_amount',
'installment_commitment',
'residence_since', 'age',
'existing_credi...
'savings_status',
'employment',
'personal_status',
'other_parties',
'property_magnitude',
'other_payment_plans',
'housing', 'job',
'own_telephone',
'foreign_worker'])])),
('model',
RandomForestClassifier(criterion='entropy', max_depth=70,
max_features=0.1142268477118407,
max_samples=0.5483119512487002,
min_samples_leaf=8,
min_samples_split=12, n_estimators=553,
random_state=42))])ColumnTransformer(transformers=[('numeric',
Pipeline(steps=[('imputer',
TransformerChooser(transformer=SimpleImputer(strategy='median'))),
('scaler',
TransformerChooser()),
('pca',
TransformerChooser(transformer=PCA(n_components='mle')))]),
['duration', 'credit_amount',
'installment_commitment', 'residence_since',
'age', 'existing_credits',
'num_dependents']),
('non_numeric',
Pipeline(steps=[('encoder',
TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
['checking_status', 'credit_history',
'purpose', 'savings_status', 'employment',
'personal_status', 'other_parties',
'property_magnitude', 'other_payment_plans',
'housing', 'job', 'own_telephone',
'foreign_worker'])])['duration', 'credit_amount', 'installment_commitment', 'residence_since', 'age', 'existing_credits', 'num_dependents']
TransformerChooser(transformer=SimpleImputer(strategy='median'))
SimpleImputer(strategy='median')
SimpleImputer(strategy='median')
TransformerChooser()
TransformerChooser(transformer=PCA(n_components='mle'))
PCA(n_components='mle')
PCA(n_components='mle')
['checking_status', 'credit_history', 'purpose', 'savings_status', 'employment', 'personal_status', 'other_parties', 'property_magnitude', 'other_payment_plans', 'housing', 'job', 'own_telephone', 'foreign_worker']
TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore'))
OneHotEncoder(handle_unknown='ignore')
OneHotEncoder(handle_unknown='ignore')
RandomForestClassifier(criterion='entropy', max_depth=70,
max_features=0.1142268477118407,
max_samples=0.5483119512487002, min_samples_leaf=8,
min_samples_split=12, n_estimators=553, random_state=42)client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl')
'/Users/shanekercheval/repos/data-science-template/mlflow-artifact-root/1/2702b1f57ead444a81097fc2673ef18d/artifacts/x_train.pkl'
with Timer("Loading training/test datasets"):
X_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl'))
X_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_test.pkl'))
y_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_train.pkl'))
y_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_test.pkl'))
2022-06-10 14:46:55 - INFO | *****Timer Started: Loading training/test datasets 2022-06-10 14:46:55 - INFO | *****Timer Finished (0.00 seconds)
log_info(X_train.shape)
log_info(len(y_train))
log_info(X_test.shape)
log_info(len(y_test))
2022-06-10 14:46:55 - INFO | (800, 20) 2022-06-10 14:46:55 - INFO | 800 2022-06-10 14:46:55 - INFO | (200, 20) 2022-06-10 14:46:55 - INFO | 200
np.unique(y_train, return_counts=True)
(array([0, 1]), array([559, 241]))
np.unique(y_train, return_counts=True)[1] / np.sum(np.unique(y_train, return_counts=True)[1])
array([0.69875, 0.30125])
np.unique(y_test, return_counts=True)[1] / np.sum(np.unique(y_test, return_counts=True)[1])
array([0.705, 0.295])
log_info(f"Best Score: {results.best_score}")
2022-06-10 14:46:55 - INFO | Best Score: 0.7626843675980791
log_info(f"Best Params: {results.best_params}")
2022-06-10 14:46:55 - INFO | Best Params: {'model': 'RandomForestClassifier()', 'max_features': 0.1142268477118407, 'max_depth': 70, 'n_estimators': 553, 'min_samples_split': 12, 'min_samples_leaf': 8, 'max_samples': 0.5483119512487002, 'criterion': 'entropy', 'imputer': "SimpleImputer(strategy='median')", 'scaler': 'None', 'pca': "PCA('mle')", 'encoder': 'OneHotEncoder()'}
# Best model from each model-type.
df = results.to_formatted_dataframe(return_style=False, include_rank=True)
df["model_rank"] = df.groupby("model")["roc_auc Mean"].rank(method="first", ascending=False)
df.query('model_rank == 1')
| rank | roc_auc Mean | roc_auc 95CI.LO | roc_auc 95CI.HI | model | C | max_features | max_depth | n_estimators | min_samples_split | min_samples_leaf | max_samples | criterion | learning_rate | min_child_weight | subsample | colsample_bytree | colsample_bylevel | reg_alpha | reg_lambda | imputer | scaler | pca | encoder | model_rank | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 13 | 1 | 0.76 | 0.71 | 0.81 | RandomForestClassifier() | NaN | 0.11 | 70.00 | 553.00 | 12.00 | 8.00 | 0.55 | entropy | NaN | NaN | NaN | NaN | NaN | NaN | NaN | SimpleImputer(strategy='median') | None | PCA('mle') | OneHotEncoder() | 1.00 |
| 0 | 3 | 0.76 | 0.71 | 0.81 | LogisticRegression() | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | SimpleImputer() | StandardScaler() | None | OneHotEncoder() | 1.00 |
| 7 | 4 | 0.76 | 0.71 | 0.80 | ExtraTreesClassifier() | NaN | 0.70 | 38.00 | 558.00 | 18.00 | 1.00 | 0.51 | gini | NaN | NaN | NaN | NaN | NaN | NaN | NaN | SimpleImputer() | None | PCA('mle') | OneHotEncoder() | 1.00 |
| 19 | 11 | 0.73 | 0.70 | 0.76 | XGBClassifier() | NaN | NaN | 15.00 | 1159.00 | NaN | NaN | NaN | NaN | 0.03 | 29.00 | 0.83 | 0.52 | 0.50 | 0.00 | 1.84 | SimpleImputer(strategy='median') | None | PCA('mle') | CustomOrdinalEncoder() | 1.00 |
results.to_formatted_dataframe(return_style=True,
include_rank=True,
num_rows=500)
| rank | roc_auc Mean | roc_auc 95CI.LO | roc_auc 95CI.HI | model | C | max_features | max_depth | n_estimators | min_samples_split | min_samples_leaf | max_samples | criterion | learning_rate | min_child_weight | subsample | colsample_bytree | colsample_bylevel | reg_alpha | reg_lambda | imputer | scaler | pca | encoder |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.763 | 0.712 | 0.814 | RandomForestClassifier() | <NA> | 0.114 | 70.000 | 553.000 | 12.000 | 8.000 | 0.548 | entropy | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | None | PCA('mle') | OneHotEncoder() |
| 2 | 0.759 | 0.699 | 0.819 | RandomForestClassifier() | <NA> | 0.030 | 84.000 | 1,088.000 | 24.000 | 36.000 | 0.981 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | None | OneHotEncoder() |
| 3 | 0.759 | 0.713 | 0.805 | LogisticRegression() | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | StandardScaler() | None | OneHotEncoder() |
| 4 | 0.759 | 0.714 | 0.804 | ExtraTreesClassifier() | <NA> | 0.700 | 38.000 | 558.000 | 18.000 | 1.000 | 0.514 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | PCA('mle') | OneHotEncoder() |
| 5 | 0.756 | 0.726 | 0.786 | RandomForestClassifier() | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | None | OneHotEncoder() |
| 6 | 0.752 | 0.684 | 0.819 | LogisticRegression() | 0.001 | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | MinMaxScaler() | None | OneHotEncoder() |
| 7 | 0.744 | 0.709 | 0.780 | RandomForestClassifier() | <NA> | 0.681 | 38.000 | 1,461.000 | 23.000 | 10.000 | 0.553 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | None | None | CustomOrdinalEncoder() |
| 8 | 0.742 | 0.689 | 0.796 | ExtraTreesClassifier() | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | None | OneHotEncoder() |
| 9 | 0.742 | 0.703 | 0.781 | ExtraTreesClassifier() | <NA> | 0.473 | 25.000 | 834.000 | 48.000 | 6.000 | 0.631 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | PCA('mle') | CustomOrdinalEncoder() |
| 10 | 0.741 | 0.689 | 0.793 | RandomForestClassifier() | <NA> | 0.559 | 72.000 | 1,330.000 | 35.000 | 32.000 | 0.630 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='most_frequent') | None | PCA('mle') | OneHotEncoder() |
| 11 | 0.726 | 0.698 | 0.755 | XGBClassifier() | <NA> | <NA> | 15.000 | 1,159.000 | <NA> | <NA> | <NA> | <NA> | 0.032 | 29.000 | 0.834 | 0.520 | 0.503 | 0.003 | 1.839 | SimpleImputer(strategy='median') | None | PCA('mle') | CustomOrdinalEncoder() |
| 12 | 0.726 | 0.689 | 0.762 | LogisticRegression() | 0.000 | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | StandardScaler() | None | CustomOrdinalEncoder() |
| 13 | 0.723 | 0.637 | 0.809 | LogisticRegression() | 0.403 | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | StandardScaler() | None | CustomOrdinalEncoder() |
| 14 | 0.723 | 0.666 | 0.779 | ExtraTreesClassifier() | <NA> | 0.857 | 30.000 | 879.000 | 17.000 | 28.000 | 0.563 | entropy | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | None | None | CustomOrdinalEncoder() |
| 15 | 0.722 | 0.658 | 0.786 | ExtraTreesClassifier() | <NA> | 0.781 | 50.000 | 590.000 | 35.000 | 47.000 | 0.846 | gini | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | None | PCA('mle') | OneHotEncoder() |
| 16 | 0.720 | 0.699 | 0.741 | XGBClassifier() | <NA> | <NA> | 19.000 | 1,217.000 | <NA> | <NA> | <NA> | <NA> | 0.042 | 4.000 | 0.893 | 0.936 | 0.652 | 0.029 | 3.708 | SimpleImputer(strategy='most_frequent') | None | None | CustomOrdinalEncoder() |
| 17 | 0.720 | 0.638 | 0.801 | LogisticRegression() | 3.489 | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer(strategy='median') | StandardScaler() | PCA('mle') | CustomOrdinalEncoder() |
| 18 | 0.718 | 0.695 | 0.740 | XGBClassifier() | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | None | OneHotEncoder() |
| 19 | 0.716 | 0.666 | 0.766 | XGBClassifier() | <NA> | <NA> | 12.000 | 1,471.000 | <NA> | <NA> | <NA> | <NA> | 0.126 | 8.000 | 0.912 | 0.569 | 0.821 | 0.045 | 1.029 | SimpleImputer() | None | PCA('mle') | CustomOrdinalEncoder() |
| 20 | 0.711 | 0.659 | 0.764 | XGBClassifier() | <NA> | <NA> | 5.000 | 1,218.000 | <NA> | <NA> | <NA> | <NA> | 0.115 | 2.000 | 0.545 | 0.648 | 0.852 | 0.123 | 1.165 | SimpleImputer(strategy='median') | None | PCA('mle') | CustomOrdinalEncoder() |
results.to_formatted_dataframe(query='model == "RandomForestClassifier()"', include_rank=True)
| rank | roc_auc Mean | roc_auc 95CI.LO | roc_auc 95CI.HI | max_features | max_depth | n_estimators | min_samples_split | min_samples_leaf | max_samples | criterion | imputer | pca | encoder |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.763 | 0.712 | 0.814 | 0.114 | 70.000 | 553.000 | 12.000 | 8.000 | 0.548 | entropy | SimpleImputer(strategy='median') | PCA('mle') | OneHotEncoder() |
| 2 | 0.759 | 0.699 | 0.819 | 0.030 | 84.000 | 1,088.000 | 24.000 | 36.000 | 0.981 | gini | SimpleImputer() | None | OneHotEncoder() |
| 3 | 0.756 | 0.726 | 0.786 | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | <NA> | SimpleImputer() | None | OneHotEncoder() |
| 4 | 0.744 | 0.709 | 0.780 | 0.681 | 38.000 | 1,461.000 | 23.000 | 10.000 | 0.553 | gini | SimpleImputer(strategy='median') | None | CustomOrdinalEncoder() |
| 5 | 0.741 | 0.689 | 0.793 | 0.559 | 72.000 | 1,330.000 | 35.000 | 32.000 | 0.630 | gini | SimpleImputer(strategy='most_frequent') | PCA('mle') | OneHotEncoder() |
results.to_formatted_dataframe(query='model == "LogisticRegression()"', include_rank=True)
| rank | roc_auc Mean | roc_auc 95CI.LO | roc_auc 95CI.HI | C | imputer | scaler | pca | encoder |
|---|---|---|---|---|---|---|---|---|
| 1 | 0.759 | 0.713 | 0.805 | <NA> | SimpleImputer() | StandardScaler() | None | OneHotEncoder() |
| 2 | 0.752 | 0.684 | 0.819 | 0.001 | SimpleImputer(strategy='median') | MinMaxScaler() | None | OneHotEncoder() |
| 3 | 0.726 | 0.689 | 0.762 | 0.000 | SimpleImputer(strategy='median') | StandardScaler() | None | CustomOrdinalEncoder() |
| 4 | 0.723 | 0.637 | 0.809 | 0.403 | SimpleImputer(strategy='median') | StandardScaler() | None | CustomOrdinalEncoder() |
| 5 | 0.720 | 0.638 | 0.801 | 3.489 | SimpleImputer(strategy='median') | StandardScaler() | PCA('mle') | CustomOrdinalEncoder() |
results.plot_performance_across_trials(facet_by='model').show()
results.plot_performance_across_trials(query='model == "RandomForestClassifier()"').show()
results.plot_parameter_values_across_trials(query='model == "RandomForestClassifier()"').show()
# results.plot_scatter_matrix(query='model == "RandomForestClassifier()"',
# height=1000, width=1000).show()
results.plot_performance_numeric_params(query='model == "RandomForestClassifier()"',
height=800)
results.plot_parallel_coordinates(query='model == "RandomForestClassifier()"').show()
results.plot_performance_non_numeric_params(query='model == "RandomForestClassifier()"').show()
results.plot_score_vs_parameter(
query='model == "RandomForestClassifier()"',
parameter='max_features',
size='max_depth',
color='encoder',
)
# results.plot_parameter_vs_parameter(
# query='model == "XGBClassifier()"',
# parameter_x='colsample_bytree',
# parameter_y='learning_rate',
# size='max_depth'
# )
# results.plot_parameter_vs_parameter(
# query='model == "XGBClassifier()"',
# parameter_x='colsample_bytree',
# parameter_y='learning_rate',
# size='imputer'
# )
test_predictions = best_estimator.predict_proba(X_test)[:, 1]
test_predictions[0:10]
array([0.37346429, 0.4104999 , 0.48390757, 0.35895319, 0.17376038,
0.32902654, 0.17785134, 0.40538264, 0.21201897, 0.24403248])
evaluator = hlp.sklearn_eval.TwoClassEvaluator(
actual_values=y_test,
predicted_scores=test_predictions,
score_threshold=0.37
)
evaluator.plot_actual_vs_predict_histogram()
evaluator.plot_confusion_matrix()
evaluator.all_metrics_df(return_style=True,
dummy_classifier_strategy=['prior', 'constant'],
round_by=3)
| Score | Dummy (prior) | Dummy (constant) | Explanation | |
|---|---|---|---|---|
| AUC | 0.794 | 0.500 | 0.500 | Area under the ROC curve (true pos. rate vs false pos. rate); ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier) |
| True Positive Rate | 0.593 | 0.000 | 1.000 | 59.3% of positive instances were correctly identified.; i.e. 35 "Positive Class" labels were correctly identified out of 59 instances; a.k.a Sensitivity/Recall |
| True Negative Rate | 0.837 | 1.000 | 0.000 | 83.7% of negative instances were correctly identified.; i.e. 118 "Negative Class" labels were correctly identified out of 141 instances |
| False Positive Rate | 0.163 | 0.000 | 1.000 | 16.3% of negative instances were incorrectly identified as positive; i.e. 23 "Negative Class" labels were incorrectly identified as "Positive Class", out of 141 instances |
| False Negative Rate | 0.407 | 1.000 | 0.000 | 40.7% of positive instances were incorrectly identified as negative; i.e. 24 "Positive Class" labels were incorrectly identified as "Negative Class", out of 59 instances |
| Positive Predictive Value | 0.603 | 0.000 | 0.295 | When the model claims an instance is positive, it is correct 60.3% of the time; i.e. out of the 58 times the model predicted "Positive Class", it was correct 35 times; a.k.a precision |
| Negative Predictive Value | 0.831 | 0.705 | 0.000 | When the model claims an instance is negative, it is correct 83.1% of the time; i.e. out of the 142 times the model predicted "Negative Class", it was correct 118 times |
| F1 Score | 0.598 | 0.000 | 0.456 | The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. |
| Precision/Recall AUC | 0.642 | 0.295 | 0.295 | Precision/Recall AUC is calculated with `average_precision` which summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold. See sci-kit learn documentation for caveats. |
| Accuracy | 0.765 | 0.705 | 0.295 | 76.5% of instances were correctly identified |
| Error Rate | 0.235 | 0.295 | 0.705 | 23.5% of instances were incorrectly identified |
| % Positive | 0.295 | 0.295 | 0.295 | 29.5% of the data are positive; i.e. out of 200 total observations; 59 are labeled as "Positive Class" |
| Total Observations | 200 | 200 | 200 | There are 200 total observations; i.e. sample size |
evaluator.plot_roc_auc_curve().show()
<Figure size 720x444.984 with 0 Axes>
evaluator.plot_precision_recall_auc_curve().show()
evaluator.plot_threshold_curves(score_threshold_range=(0.1, 0.7)).show()
evaluator.plot_precision_recall_tradeoff(score_threshold_range=(0.1, 0.6)).show()
evaluator.calculate_lift_gain(return_style=True)
| Gain | Lift | |
|---|---|---|
| Percentile | ||
| 5 | 0.17 | 3.39 |
| 10 | 0.25 | 2.54 |
| 15 | 0.32 | 2.15 |
| 20 | 0.42 | 2.12 |
| 25 | 0.53 | 2.10 |
| 30 | 0.61 | 2.03 |
| 35 | 0.68 | 1.94 |
| 40 | 0.75 | 1.86 |
| 45 | 0.76 | 1.69 |
| 50 | 0.80 | 1.59 |
| 55 | 0.80 | 1.45 |
| 60 | 0.88 | 1.47 |
| 65 | 0.88 | 1.36 |
| 70 | 0.92 | 1.31 |
| 75 | 0.93 | 1.24 |
| 80 | 0.97 | 1.21 |
| 85 | 0.98 | 1.16 |
| 90 | 1.00 | 1.11 |
| 95 | 1.00 | 1.05 |
| 100 | 1.00 | 1.00 |